Testing the different components of the enveloppe
Motion Clouds: testing components of the envelope¶
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
#mc.recompute = True
mc.notebook = True
Testing the speed¶
Here the link to the test page for the component Speed
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
color = mc.envelope_color(fx, fy, ft)
name = 'speed'
z = color*mc.envelope_speed(fx, fy, ft)
mc.figures(z, name)
mc.in_show_video(name)
Exploring the orientation component of the envelope around a grating.¶
Here the link to the test page for the component Grating
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
color = mc.envelope_color(fx, fy, ft)
name = 'grating'
z = color * mc.envelope_gabor(fx, fy, ft)
mc.figures(z, name)
mc.in_show_video(name)
Here the link to the test page for the component Radial
B_theta = np.pi/8.
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
name = 'radial'
mc.figures_MC(fx, fy, ft, name, B_theta=B_theta)
verbose = False
mc.in_show_video(name)
Testing the color¶
Here the link to the test page for the component Color
name = 'color'
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
z = mc.envelope_color(fx, fy, ft)
mc.figures(z, name)
mc.in_show_video(name)
Testing competing Motion Clouds with psychopy
Analysis of a Discrimination Experiment¶
In the psychopy_competition.py script, we implemented an experiment to test whether one could discriminate the dominating motion in a sum of two motion clouds in opposite directions.
Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
Analysis, version 1¶
In a first version of the experiment, we only stored the results in a numpy file:
!ls ../data/competing_v1_*npy
import glob
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('../data/competing_v1_*npy'):
results = np.load(fn)
print 'experiment ', fn, ', # trials=', results.shape[1]
ax.scatter(results[1, :], results[0, :])
_ = ax.axis([0., 1., -1.1, 1.1])
_ = ax.set_xlabel('contrast')
_ = ax.set_ylabel('perceived direction')
alpha = .3
fig = plt.figure(figsize=(12,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
data = []
for fn in glob.glob('../data/competing_v1_*npy'):
results = np.load(fn)
data_ = np.empty(results.shape)
# lower stronger, detected lower = CORRECT is 1
ax1.scatter(results[1, results[1, :]<.5],
1.*(results[0, results[1, :]<.5]==-1),
c='r', alpha=alpha)
# copying data: contrast (from .5 to 1), correct
data_[0, results[1, :]<.5] = 1. - results[1, results[1, :]<.5]
data_[1, results[1, :]<.5] = 1.*(results[0, results[1, :]<.5]==-1)
# upper stronger, detected lower = INCORRECT is 1
ax1.scatter(results[1, results[1, :]>.5],
1.*(results[0, results[1, :]>.5]==-1),
c='b', alpha=alpha)
# lower stronger, detected upper = INCORRECT is 1
ax2.scatter(results[1, results[1, :]<.5],
1.*(results[0, results[1, :]<.5]==1),
c='b', alpha=alpha, marker='x')
# upper stronger, detected upper = CORRECT is 1
ax2.scatter(results[1, results[1, :]>.5],
1.*(results[0, results[1, :]>.5]==1),
c='r', alpha=alpha, marker='x')
# copying data: contrast (from .5 to 1), correct
data_[0, results[1, :]>=.5] = results[1, results[1, :]>=.5]
data_[1, results[1, :]>=.5] = 1.*(results[0, results[1, :]>=.5]==1)
data.append(data_)
for ax in [ax1, ax2]:
ax.axis([0., 1., -.1, 1.1])
_ = ax.set_xlabel('contrast')
_ = ax1.set_ylabel('detected lower')
_ = ax1.legend(['lower', 'upper'], loc='right')
_ = ax2.set_ylabel('detected upper')
_ = ax2.legend(['lower', 'upper'], loc='right')
(note the subplots are equivalent up to a flip)
One could not fit psychometric curves for this 2AFC. See for instance https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3110332/
let's explore another way:
import numpy as np
import pylab
from scipy.optimize import curve_fit
def sigmoid(c, c0, k):
y = 1 / (1 + np.exp(-k*(c-c0)))
return y
for fn in glob.glob('../data/competing_v1_*npy'):
results = np.load(fn)
cdata, ydata = results[1, :], .5*results[0, : ]+.5
pylab.plot(cdata, ydata, 'o')
popt, pcov = curve_fit(sigmoid, cdata, ydata)
print popt
c = np.linspace(0, 1, 50)
y = sigmoid(c, *popt)
pylab.plot(c, y)
pylab.ylim(-.05, 1.05)
pylab.legend(loc='best')
Analysis, version 2¶
In the second version, we also stored some information about the experiment
from psychopy import misc
mean, slope = [], []
for fn in glob.glob('../data/competing_v2_*npy'):
results = np.load(fn)
data = misc.fromFile(fn.replace('npy', 'pickle'))
print data
cdata, ydata = results[1, :], .5*results[0, : ]+.5
pylab.plot(cdata, ydata, 'o')
popt, pcov = curve_fit(sigmoid, cdata, ydata)
mean.append(popt[0])
slope.append(popt[1])
c = np.linspace(0, 1, 50)
y = sigmoid(c, *popt)
pylab.plot(c, y)
pylab.text(0.05, 0.8, 'mean : %0.2f , slope : %0.2f ' %(np.mean(mean), np.mean(slope)))
pylab.ylim(-.05, 1.05)
pylab.legend(loc='best')
Testing discrimination between Motion Clouds with psychopy
Analysis of a discrimination experiment¶
In the psychopy_discrimination.py script, we implemented an experiment to test whether the overall shape of motion clouds could influence discrimination of speed.
Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
The experiment¶
#%pycat psychopy_discrimination.py
Analysis - version 1¶
In a first version of the experiment, we only stored the results in a numpy file.
!ls ../data/discriminating_*
!ls ../data/
import glob
for fn in glob.glob('../data/discriminating_*npy'):
results = np.load(fn)
print fn, results.shape
print('analyzing results')
print '# of trials :', np.abs(results).sum(axis=1)
print 'average results: ', (results.sum(axis=1)/np.abs(results).sum(axis=1)*.5+.5)
%whos
Another solution is to use:
#data= 1
#np.savez('toto', data=data, results=results)
#print np.load('toto.npz')['data']
or directly psychopy.misc:
from psychopy import misc
#info = misc.fromFile('../data/discriminating.pickle')
alphas = [-1., -.5, 0., 0.5, 1., 1.5, 2.]
fileName = '../data/discriminating_' + info['observer'] + '.pickle'
info['alphas'] = alphas
info['results'] = results
#numpy.savez(fileName, results=results, alphas=alphas)
misc.toFile(fileName, info)
Analysis - version 2¶
In the second version, we stored more data:
import glob
from psychopy import misc
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('../data/discriminating_v2_*pickle'):
data = misc.fromFile(fn)
print fn, results.shape
print('analyzing results')
alphas = np.array(data['alphas'])
print ' alphas = ', alphas
print '# of trials :', np.abs(data['results']).sum(axis=1)
print 'average results: ', (data['results'].sum(axis=1)/np.abs(data['results']).sum(axis=1)*.5+.5)
width = (alphas.max()-alphas.min())/len(alphas)
ax.bar(data['alphas'] - width/2, (data['results'].sum(axis=1)/np.abs(data['results']).sum(axis=1)*.5+.5), width=width, alpha=.3 )
# for i_trial in xrange(data['results'].shape[1]):
# ax.scatter(data['alphas'][data['results']
ax.set_xlabel('alpha')
ax.set_ylabel('% correct')
# TODO : correct for the number of trials / make global reading / make a new draw of alphas
# TODO : test f_0
Testing basic functions of Motion Clouds
Motion Clouds: raw principles¶
Motion Clouds are synthesized textures which aim at having similar characteristics as natural images but with controlled parameters. There are many ways to achieve these results and this notebook aims at showing that different procedures from different communities (neurioscience, modelling, computer vision, ...) may produce similar results.
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
Using Fourier ("official Motion Clouds")¶
import sys
sys.path.append('..')
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
Using mixtures of images¶
import scipy.misc
lena = scipy.misc.lena() * 1.
lena -= lena.mean()
lena /= lena.std()
print lena.shape
plt.imshow(lena, cmap=plt.cm.gray)
lena.shape
lena[0, :]
def noise(image=lena):
for axis in [0, 1]:
image = np.roll(image, np.random.randint(image.shape[axis]), axis=axis)
return image
plt.imshow(noise(), cmap=plt.cm.gray)
plt.imshow(noise(), cmap=plt.cm.gray)
Using ARMA processes¶
Now, we define the ARMA process as an averaging process with a certain time constant \(\tau=30.\) (in frames).
def ARMA(image, tau=30.):
image = (1 - 1/tau)* image + 1/tau * noise()
return image
initializing
image = ARMA(lena)
plt.imshow(image, cmap=plt.cm.gray)
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
%cd..
N_frame = 1024
z = np.zeros((lena.shape[0], lena.shape[1], N_frame))
z[:, :, 0] = image
for i_frame in range(1, N_frame):
z[:, :, i_frame] = ARMA(z[:, :, i_frame-1])
mc.anim_save(.5 + .5*z, filename='results/arma')
mc.notebook = True
mc.in_show_video(name='arma')